runtime: add reconnect timeout for vhost user block

Fixes: #6075
Signed-off-by: zhaojizhuang <571130360@qq.com>
This commit is contained in:
zhaojizhuang 2023-01-17 11:23:36 +08:00 committed by zhaojizhuang
parent 95602c8c08
commit ca02c9f512
21 changed files with 144 additions and 76 deletions

View File

@ -57,6 +57,7 @@ There are several kinds of Kata configurations and they are listed below.
| `io.katacontainers.config.hypervisor.enable_iothreads` | `boolean`| enable IO to be processed in a separate thread. Supported currently for virtio-`scsi` driver | | `io.katacontainers.config.hypervisor.enable_iothreads` | `boolean`| enable IO to be processed in a separate thread. Supported currently for virtio-`scsi` driver |
| `io.katacontainers.config.hypervisor.enable_mem_prealloc` | `boolean` | the memory space used for `nvdimm` device by the hypervisor | | `io.katacontainers.config.hypervisor.enable_mem_prealloc` | `boolean` | the memory space used for `nvdimm` device by the hypervisor |
| `io.katacontainers.config.hypervisor.enable_vhost_user_store` | `boolean` | enable vhost-user storage device (QEMU) | | `io.katacontainers.config.hypervisor.enable_vhost_user_store` | `boolean` | enable vhost-user storage device (QEMU) |
| `io.katacontainers.config.hypervisor.vhost_user_reconnect_timeout_sec` | `string`| the timeout for reconnecting vhost user socket (QEMU)
| `io.katacontainers.config.hypervisor.enable_virtio_mem` | `boolean` | enable virtio-mem (QEMU) | | `io.katacontainers.config.hypervisor.enable_virtio_mem` | `boolean` | enable virtio-mem (QEMU) |
| `io.katacontainers.config.hypervisor.entropy_source` (R) | string| the path to a host source of entropy (`/dev/random`, `/dev/urandom` or real hardware RNG device) | | `io.katacontainers.config.hypervisor.entropy_source` (R) | string| the path to a host source of entropy (`/dev/random`, `/dev/urandom` or real hardware RNG device) |
| `io.katacontainers.config.hypervisor.file_mem_backend` (R) | string | file based memory backend root directory | | `io.katacontainers.config.hypervisor.file_mem_backend` (R) | string | file based memory backend root directory |

View File

@ -301,6 +301,11 @@ vhost_user_store_path = "@DEFVHOSTUSERSTOREPATH@"
# Your distribution recommends: @DEFVALIDVHOSTUSERSTOREPATHS@ # Your distribution recommends: @DEFVALIDVHOSTUSERSTOREPATHS@
valid_vhost_user_store_paths = @DEFVALIDVHOSTUSERSTOREPATHS@ valid_vhost_user_store_paths = @DEFVALIDVHOSTUSERSTOREPATHS@
# The timeout for reconnecting on non-server spdk sockets when the remote end goes away.
# qemu will delay this many seconds and then attempt to reconnect.
# Zero disables reconnecting, and the default is zero.
vhost_user_reconnect_timeout_sec = 0
# Enable file based guest memory support. The default is an empty string which # Enable file based guest memory support. The default is an empty string which
# will disable this feature. In the case of virtio-fs, this is enabled # will disable this feature. In the case of virtio-fs, this is enabled
# automatically and '/dev/shm' is used as the backing folder. # automatically and '/dev/shm' is used as the backing folder.

View File

@ -87,6 +87,8 @@ const (
// Define the string key for DriverOptions in DeviceInfo struct // Define the string key for DriverOptions in DeviceInfo struct
FsTypeOpt = "fstype" FsTypeOpt = "fstype"
BlockDriverOpt = "block-driver" BlockDriverOpt = "block-driver"
VhostUserReconnectTimeOutOpt = "vhost-user-reconnect-timeout"
) )
const ( const (
@ -97,6 +99,15 @@ const (
VhostUserSCSIMajor = 242 VhostUserSCSIMajor = 242
) )
const (
// The timeout for reconnecting on non-server sockets when the remote end
// goes away.
// qemu will delay this many seconds and then attempt to reconnect. Zero
// disables reconnecting, and is the default.
DefaultVhostUserReconnectTimeOut = 0
)
// Defining these as a variable instead of a const, to allow // Defining these as a variable instead of a const, to allow
// overriding this in the tests. // overriding this in the tests.
@ -320,6 +331,9 @@ type VhostUserDeviceAttrs struct {
CacheSize uint32 CacheSize uint32
QueueSize uint32 QueueSize uint32
// Reconnect timeout for socket of vhost user block device
ReconnectTime uint32
} }
// GetHostPathFunc is function pointer used to mock GetHostPath in tests. // GetHostPathFunc is function pointer used to mock GetHostPath in tests.

View File

@ -8,6 +8,7 @@ package drivers
import ( import (
"context" "context"
"strconv"
"github.com/kata-containers/kata-containers/src/runtime/pkg/device/api" "github.com/kata-containers/kata-containers/src/runtime/pkg/device/api"
"github.com/kata-containers/kata-containers/src/runtime/pkg/device/config" "github.com/kata-containers/kata-containers/src/runtime/pkg/device/config"
@ -76,6 +77,7 @@ func (device *VhostUserBlkDevice) Attach(ctx context.Context, devReceiver api.De
SocketPath: device.DeviceInfo.HostPath, SocketPath: device.DeviceInfo.HostPath,
Type: config.VhostUserBlk, Type: config.VhostUserBlk,
Index: index, Index: index,
ReconnectTime: vhostUserReconnect(device.DeviceInfo.DriverOptions),
} }
deviceLogger().WithFields(logrus.Fields{ deviceLogger().WithFields(logrus.Fields{
@ -83,6 +85,7 @@ func (device *VhostUserBlkDevice) Attach(ctx context.Context, devReceiver api.De
"SocketPath": vAttrs.SocketPath, "SocketPath": vAttrs.SocketPath,
"Type": config.VhostUserBlk, "Type": config.VhostUserBlk,
"Index": index, "Index": index,
"ReconnectTime": vAttrs.ReconnectTime,
}).Info("Attaching device") }).Info("Attaching device")
device.VhostUserDeviceAttrs = vAttrs device.VhostUserDeviceAttrs = vAttrs
@ -93,6 +96,24 @@ func (device *VhostUserBlkDevice) Attach(ctx context.Context, devReceiver api.De
return nil return nil
} }
func vhostUserReconnect(customOptions map[string]string) uint32 {
var vhostUserReconnectTimeout uint32
if customOptions == nil {
vhostUserReconnectTimeout = config.DefaultVhostUserReconnectTimeOut
} else {
reconnectTimeoutStr := customOptions[config.VhostUserReconnectTimeOutOpt]
if reconnectTimeout, err := strconv.Atoi(reconnectTimeoutStr); err != nil {
vhostUserReconnectTimeout = config.DefaultVhostUserReconnectTimeOut
deviceLogger().WithField("reconnect", reconnectTimeoutStr).WithError(err).Warn("Failed to get reconnect timeout for vhost-user-blk device")
} else {
vhostUserReconnectTimeout = uint32(reconnectTimeout)
}
}
return vhostUserReconnectTimeout
}
func isVirtioBlkBlockDriver(customOptions map[string]string) bool { func isVirtioBlkBlockDriver(customOptions map[string]string) bool {
var blockDriverOption string var blockDriverOption string

View File

@ -10,6 +10,7 @@ import (
"context" "context"
"encoding/hex" "encoding/hex"
"errors" "errors"
"fmt"
"sync" "sync"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
@ -42,6 +43,8 @@ type deviceManager struct {
sync.RWMutex sync.RWMutex
vhostUserStoreEnabled bool vhostUserStoreEnabled bool
vhostUserReconnectTimeout uint32
} }
func deviceLogger() *logrus.Entry { func deviceLogger() *logrus.Entry {
@ -49,10 +52,11 @@ func deviceLogger() *logrus.Entry {
} }
// NewDeviceManager creates a deviceManager object behaved as api.DeviceManager // NewDeviceManager creates a deviceManager object behaved as api.DeviceManager
func NewDeviceManager(blockDriver string, vhostUserStoreEnabled bool, vhostUserStorePath string, devices []api.Device) api.DeviceManager { func NewDeviceManager(blockDriver string, vhostUserStoreEnabled bool, vhostUserStorePath string, vhostUserReconnect uint32, devices []api.Device) api.DeviceManager {
dm := &deviceManager{ dm := &deviceManager{
vhostUserStoreEnabled: vhostUserStoreEnabled, vhostUserStoreEnabled: vhostUserStoreEnabled,
vhostUserStorePath: vhostUserStorePath, vhostUserStorePath: vhostUserStorePath,
vhostUserReconnectTimeout: vhostUserReconnect,
devices: make(map[string]api.Device), devices: make(map[string]api.Device),
} }
if blockDriver == config.VirtioMmio { if blockDriver == config.VirtioMmio {
@ -119,6 +123,7 @@ func (dm *deviceManager) createDevice(devInfo config.DeviceInfo) (dev api.Device
devInfo.DriverOptions = make(map[string]string) devInfo.DriverOptions = make(map[string]string)
} }
devInfo.DriverOptions[config.BlockDriverOpt] = dm.blockDriver devInfo.DriverOptions[config.BlockDriverOpt] = dm.blockDriver
devInfo.DriverOptions[config.VhostUserReconnectTimeOutOpt] = fmt.Sprintf("%d", dm.vhostUserReconnectTimeout)
return drivers.NewVhostUserBlkDevice(&devInfo), nil return drivers.NewVhostUserBlkDevice(&devInfo), nil
} else if isBlock(devInfo) { } else if isBlock(devInfo) {
if devInfo.DriverOptions == nil { if devInfo.DriverOptions == nil {

View File

@ -208,7 +208,7 @@ func TestAttachBlockDevice(t *testing.T) {
} }
func TestAttachDetachDevice(t *testing.T) { func TestAttachDetachDevice(t *testing.T) {
dm := NewDeviceManager(config.VirtioSCSI, false, "", nil) dm := NewDeviceManager(config.VirtioSCSI, false, "", 0, nil)
path := "/dev/hda" path := "/dev/hda"
deviceInfo := config.DeviceInfo{ deviceInfo := config.DeviceInfo{

View File

@ -1524,7 +1524,7 @@ func (q *QMP) ExecuteGetFD(ctx context.Context, fdname string, fd *os.File) erro
// ExecuteCharDevUnixSocketAdd adds a character device using as backend a unix socket, // ExecuteCharDevUnixSocketAdd adds a character device using as backend a unix socket,
// id is an identifier for the device, path specifies the local path of the unix socket, // id is an identifier for the device, path specifies the local path of the unix socket,
// wait is to block waiting for a client to connect, server specifies that the socket is a listening socket. // wait is to block waiting for a client to connect, server specifies that the socket is a listening socket.
func (q *QMP) ExecuteCharDevUnixSocketAdd(ctx context.Context, id, path string, wait, server bool) error { func (q *QMP) ExecuteCharDevUnixSocketAdd(ctx context.Context, id, path string, wait, server bool, reconnect uint32) error {
data := map[string]interface{}{ data := map[string]interface{}{
"server": server, "server": server,
"addr": map[string]interface{}{ "addr": map[string]interface{}{
@ -1540,6 +1540,10 @@ func (q *QMP) ExecuteCharDevUnixSocketAdd(ctx context.Context, id, path string,
data["wait"] = wait data["wait"] = wait
} }
if reconnect > 0 {
data["reconnect"] = reconnect
}
args := map[string]interface{}{ args := map[string]interface{}{
"id": id, "id": id,
"backend": map[string]interface{}{ "backend": map[string]interface{}{

View File

@ -1445,7 +1445,7 @@ func TestExecuteCharDevUnixSocketAdd(t *testing.T) {
cfg := QMPConfig{Logger: qmpTestLogger{}} cfg := QMPConfig{Logger: qmpTestLogger{}}
q := startQMPLoop(buf, cfg, connectedCh, disconnectedCh) q := startQMPLoop(buf, cfg, connectedCh, disconnectedCh)
checkVersion(t, connectedCh) checkVersion(t, connectedCh)
err := q.ExecuteCharDevUnixSocketAdd(context.Background(), "foo", "foo.sock", false, true) err := q.ExecuteCharDevUnixSocketAdd(context.Background(), "foo", "foo.sock", false, true, 1)
if err != nil { if err != nil {
t.Fatalf("Unexpected error %v", err) t.Fatalf("Unexpected error %v", err)
} }

View File

@ -84,6 +84,7 @@ const defaultGuestHookPath string = ""
const defaultVirtioFSCacheMode = "never" const defaultVirtioFSCacheMode = "never"
const defaultDisableImageNvdimm = false const defaultDisableImageNvdimm = false
const defaultVhostUserStorePath string = "/var/run/kata-containers/vhost-user/" const defaultVhostUserStorePath string = "/var/run/kata-containers/vhost-user/"
const defaultVhostUserDeviceReconnect = 0
const defaultRxRateLimiterMaxRate = uint64(0) const defaultRxRateLimiterMaxRate = uint64(0)
const defaultTxRateLimiterMaxRate = uint64(0) const defaultTxRateLimiterMaxRate = uint64(0)
const defaultConfidentialGuest = false const defaultConfidentialGuest = false

View File

@ -136,6 +136,7 @@ type hypervisor struct {
BlockDeviceCacheDirect bool `toml:"block_device_cache_direct"` BlockDeviceCacheDirect bool `toml:"block_device_cache_direct"`
BlockDeviceCacheNoflush bool `toml:"block_device_cache_noflush"` BlockDeviceCacheNoflush bool `toml:"block_device_cache_noflush"`
EnableVhostUserStore bool `toml:"enable_vhost_user_store"` EnableVhostUserStore bool `toml:"enable_vhost_user_store"`
VhostUserDeviceReconnect uint32 `toml:"vhost_user_reconnect_timeout_sec"`
DisableBlockDeviceUse bool `toml:"disable_block_device_use"` DisableBlockDeviceUse bool `toml:"disable_block_device_use"`
MemPrealloc bool `toml:"enable_mem_prealloc"` MemPrealloc bool `toml:"enable_mem_prealloc"`
HugePages bool `toml:"enable_hugepages"` HugePages bool `toml:"enable_hugepages"`
@ -1269,6 +1270,7 @@ func GetDefaultHypervisorConfig() vc.HypervisorConfig {
PCIeRootPort: defaultPCIeRootPort, PCIeRootPort: defaultPCIeRootPort,
GuestHookPath: defaultGuestHookPath, GuestHookPath: defaultGuestHookPath,
VhostUserStorePath: defaultVhostUserStorePath, VhostUserStorePath: defaultVhostUserStorePath,
VhostUserDeviceReconnect: defaultVhostUserDeviceReconnect,
VirtioFSCache: defaultVirtioFSCacheMode, VirtioFSCache: defaultVirtioFSCacheMode,
DisableImageNvdimm: defaultDisableImageNvdimm, DisableImageNvdimm: defaultDisableImageNvdimm,
RxRateLimiterMaxRate: defaultRxRateLimiterMaxRate, RxRateLimiterMaxRate: defaultRxRateLimiterMaxRate,

View File

@ -478,6 +478,12 @@ func addHypervisorConfigOverrides(ocispec specs.Spec, config *vc.SandboxConfig,
return err return err
} }
if err := newAnnotationConfiguration(ocispec, vcAnnotations.VhostUserDeviceReconnect).setUint(func(reconnect uint64) {
config.HypervisorConfig.VhostUserDeviceReconnect = uint32(reconnect)
}); err != nil {
return err
}
if value, ok := ocispec.Annotations[vcAnnotations.GuestHookPath]; ok { if value, ok := ocispec.Annotations[vcAnnotations.GuestHookPath]; ok {
if value != "" { if value != "" {
config.HypervisorConfig.GuestHookPath = value config.HypervisorConfig.GuestHookPath = value

View File

@ -216,7 +216,7 @@ func TestContainerAddDriveDir(t *testing.T) {
sandbox := &Sandbox{ sandbox := &Sandbox{
ctx: context.Background(), ctx: context.Background(),
id: testSandboxID, id: testSandboxID,
devManager: manager.NewDeviceManager(config.VirtioSCSI, false, "", nil), devManager: manager.NewDeviceManager(config.VirtioSCSI, false, "", 0, nil),
hypervisor: &mockHypervisor{}, hypervisor: &mockHypervisor{},
agent: &mockAgent{}, agent: &mockAgent{},
config: &SandboxConfig{ config: &SandboxConfig{

View File

@ -82,7 +82,7 @@ func TestContainerRemoveDrive(t *testing.T) {
sandbox := &Sandbox{ sandbox := &Sandbox{
ctx: context.Background(), ctx: context.Background(),
id: "sandbox", id: "sandbox",
devManager: manager.NewDeviceManager(config.VirtioSCSI, false, "", nil), devManager: manager.NewDeviceManager(config.VirtioSCSI, false, "", 0, nil),
config: &SandboxConfig{}, config: &SandboxConfig{},
} }

View File

@ -302,6 +302,7 @@ type Param struct {
} }
// HypervisorConfig is the hypervisor configuration. // HypervisorConfig is the hypervisor configuration.
// nolint: govet
type HypervisorConfig struct { type HypervisorConfig struct {
// customAssets is a map of assets. // customAssets is a map of assets.
// Each value in that map takes precedence over the configured assets. // Each value in that map takes precedence over the configured assets.
@ -387,6 +388,10 @@ type HypervisorConfig struct {
// related folders, sockets and device nodes should be. // related folders, sockets and device nodes should be.
VhostUserStorePath string VhostUserStorePath string
// VhostUserDeviceReconnect is the timeout for reconnecting on non-server spdk sockets
// when the remote end goes away. Zero disables reconnecting.
VhostUserDeviceReconnect uint32
// GuestCoredumpPath is the path in host for saving guest memory dump // GuestCoredumpPath is the path in host for saving guest memory dump
GuestMemoryDumpPath string GuestMemoryDumpPath string

View File

@ -408,7 +408,7 @@ func TestHandleBlockVolume(t *testing.T) {
mounts = append(mounts, vMount, bMount, dMount) mounts = append(mounts, vMount, bMount, dMount)
tmpDir := "/vhost/user/dir" tmpDir := "/vhost/user/dir"
dm := manager.NewDeviceManager(config.VirtioBlock, true, tmpDir, devices) dm := manager.NewDeviceManager(config.VirtioBlock, true, tmpDir, 0, devices)
sConfig := SandboxConfig{} sConfig := SandboxConfig{}
sConfig.HypervisorConfig.BlockDeviceDriver = config.VirtioBlock sConfig.HypervisorConfig.BlockDeviceDriver = config.VirtioBlock
@ -466,7 +466,7 @@ func TestAppendDevicesEmptyContainerDeviceList(t *testing.T) {
c := &Container{ c := &Container{
sandbox: &Sandbox{ sandbox: &Sandbox{
devManager: manager.NewDeviceManager("virtio-scsi", false, "", nil), devManager: manager.NewDeviceManager("virtio-scsi", false, "", 0, nil),
}, },
devices: ctrDevices, devices: ctrDevices,
} }
@ -499,7 +499,7 @@ func TestAppendDevices(t *testing.T) {
c := &Container{ c := &Container{
sandbox: &Sandbox{ sandbox: &Sandbox{
devManager: manager.NewDeviceManager("virtio-blk", false, "", ctrDevices), devManager: manager.NewDeviceManager("virtio-blk", false, "", 0, ctrDevices),
config: sandboxConfig, config: sandboxConfig,
}, },
} }
@ -547,7 +547,7 @@ func TestAppendVhostUserBlkDevices(t *testing.T) {
testVhostUserStorePath := "/test/vhost/user/store/path" testVhostUserStorePath := "/test/vhost/user/store/path"
c := &Container{ c := &Container{
sandbox: &Sandbox{ sandbox: &Sandbox{
devManager: manager.NewDeviceManager("virtio-blk", true, testVhostUserStorePath, ctrDevices), devManager: manager.NewDeviceManager("virtio-blk", true, testVhostUserStorePath, 0, ctrDevices),
config: sandboxConfig, config: sandboxConfig,
}, },
} }

View File

@ -32,7 +32,7 @@ func TestSandboxRestore(t *testing.T) {
sandbox := Sandbox{ sandbox := Sandbox{
id: "test-exp", id: "test-exp",
containers: container, containers: container,
devManager: manager.NewDeviceManager(config.VirtioSCSI, false, "", nil), devManager: manager.NewDeviceManager(config.VirtioSCSI, false, "", 0, nil),
hypervisor: &mockHypervisor{}, hypervisor: &mockHypervisor{},
network: network, network: network,
ctx: context.Background(), ctx: context.Background(),

View File

@ -112,6 +112,10 @@ const (
// related folders, sockets and device nodes should be. // related folders, sockets and device nodes should be.
VhostUserStorePath = kataAnnotHypervisorPrefix + "vhost_user_store_path" VhostUserStorePath = kataAnnotHypervisorPrefix + "vhost_user_store_path"
// VhostUserDeviceReconnect is a sandbox annotation to specify the timeout for reconnecting on
// non-server sockets when the remote end goes away.
VhostUserDeviceReconnect = kataAnnotHypervisorPrefix + "vhost_user_reconnect_timeout_sec"
// GuestHookPath is a sandbox annotation to specify the path within the VM that will be used for 'drop-in' hooks. // GuestHookPath is a sandbox annotation to specify the path within the VM that will be used for 'drop-in' hooks.
GuestHookPath = kataAnnotHypervisorPrefix + "guest_hook_path" GuestHookPath = kataAnnotHypervisorPrefix + "guest_hook_path"

View File

@ -1513,7 +1513,7 @@ func (q *qemu) hotplugAddBlockDevice(ctx context.Context, drive *config.BlockDri
} }
func (q *qemu) hotplugAddVhostUserBlkDevice(ctx context.Context, vAttr *config.VhostUserDeviceAttrs, op Operation, devID string) (err error) { func (q *qemu) hotplugAddVhostUserBlkDevice(ctx context.Context, vAttr *config.VhostUserDeviceAttrs, op Operation, devID string) (err error) {
err = q.qmpMonitorCh.qmp.ExecuteCharDevUnixSocketAdd(q.qmpMonitorCh.ctx, vAttr.DevID, vAttr.SocketPath, false, false) err = q.qmpMonitorCh.qmp.ExecuteCharDevUnixSocketAdd(q.qmpMonitorCh.ctx, vAttr.DevID, vAttr.SocketPath, false, false, vAttr.ReconnectTime)
if err != nil { if err != nil {
return err return err
} }

View File

@ -600,7 +600,7 @@ func newSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Factor
s.devManager = deviceManager.NewDeviceManager(sandboxConfig.HypervisorConfig.BlockDeviceDriver, s.devManager = deviceManager.NewDeviceManager(sandboxConfig.HypervisorConfig.BlockDeviceDriver,
sandboxConfig.HypervisorConfig.EnableVhostUserStore, sandboxConfig.HypervisorConfig.EnableVhostUserStore,
sandboxConfig.HypervisorConfig.VhostUserStorePath, nil) sandboxConfig.HypervisorConfig.VhostUserStorePath, sandboxConfig.HypervisorConfig.VhostUserDeviceReconnect, nil)
// Create the sandbox resource controllers. // Create the sandbox resource controllers.
if err := s.createResourceController(); err != nil { if err := s.createResourceController(); err != nil {

View File

@ -30,7 +30,7 @@ func TestSandboxAttachDevicesVhostUserBlk(t *testing.T) {
tmpDir := t.TempDir() tmpDir := t.TempDir()
os.RemoveAll(tmpDir) os.RemoveAll(tmpDir)
dm := manager.NewDeviceManager(config.VirtioSCSI, true, tmpDir, nil) dm := manager.NewDeviceManager(config.VirtioSCSI, true, tmpDir, 0, nil)
vhostUserDevNodePath := filepath.Join(tmpDir, "/block/devices/") vhostUserDevNodePath := filepath.Join(tmpDir, "/block/devices/")
vhostUserSockPath := filepath.Join(tmpDir, "/block/sockets/") vhostUserSockPath := filepath.Join(tmpDir, "/block/sockets/")

View File

@ -541,7 +541,7 @@ func TestSandboxAttachDevicesVFIO(t *testing.T) {
config.SysIOMMUPath = savedIOMMUPath config.SysIOMMUPath = savedIOMMUPath
}() }()
dm := manager.NewDeviceManager(config.VirtioSCSI, false, "", nil) dm := manager.NewDeviceManager(config.VirtioSCSI, false, "", 0, nil)
path := filepath.Join(vfioPath, testFDIOGroup) path := filepath.Join(vfioPath, testFDIOGroup)
deviceInfo := config.DeviceInfo{ deviceInfo := config.DeviceInfo{
HostPath: path, HostPath: path,
@ -1080,7 +1080,7 @@ func TestAttachBlockDevice(t *testing.T) {
DevType: "b", DevType: "b",
} }
dm := manager.NewDeviceManager(config.VirtioBlock, false, "", nil) dm := manager.NewDeviceManager(config.VirtioBlock, false, "", 0, nil)
device, err := dm.NewDevice(deviceInfo) device, err := dm.NewDevice(deviceInfo)
assert.Nil(t, err) assert.Nil(t, err)
_, ok := device.(*drivers.BlockDevice) _, ok := device.(*drivers.BlockDevice)
@ -1136,7 +1136,7 @@ func TestPreAddDevice(t *testing.T) {
HypervisorConfig: hConfig, HypervisorConfig: hConfig,
} }
dm := manager.NewDeviceManager(config.VirtioBlock, false, "", nil) dm := manager.NewDeviceManager(config.VirtioBlock, false, "", 0, nil)
// create a sandbox first // create a sandbox first
sandbox := &Sandbox{ sandbox := &Sandbox{
id: testSandboxID, id: testSandboxID,