devices: filter vhost-user-blk/scsi device

Reserved number of Linux device number 241 and 242
are used to identify vhost-user-blk and vhost-user-scsi
devices.
for example, after command:
mknod <Vhost-User-Dir>/block/devices/vhost-dev0 b 241 0
this node will be recognized as vhost-user-blk device.

Fixes: #2380

Signed-off-by: Liu Xiaodong <xiaodong.liu@intel.com>
This commit is contained in:
Liu Xiaodong 2018-02-22 01:12:13 -05:00
parent 54b24dad4d
commit 3696318436
11 changed files with 241 additions and 17 deletions

View File

@ -87,7 +87,7 @@ func TestContainerRemoveDrive(t *testing.T) {
sandbox := &Sandbox{
ctx: context.Background(),
id: "sandbox",
devManager: manager.NewDeviceManager(manager.VirtioSCSI, nil),
devManager: manager.NewDeviceManager(manager.VirtioSCSI, false, "", nil),
config: &SandboxConfig{},
}
@ -306,7 +306,7 @@ func TestContainerAddDriveDir(t *testing.T) {
sandbox := &Sandbox{
ctx: context.Background(),
id: testSandboxID,
devManager: manager.NewDeviceManager(manager.VirtioSCSI, nil),
devManager: manager.NewDeviceManager(manager.VirtioSCSI, false, "", nil),
hypervisor: &mockHypervisor{},
agent: &noopAgent{},
config: &SandboxConfig{

View File

@ -8,11 +8,13 @@ package config
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"github.com/go-ini/ini"
"golang.org/x/sys/unix"
)
// DeviceType indicates device type
@ -66,6 +68,14 @@ const (
VirtioFS = "virtio-fs"
)
const (
// The OCI spec requires the major-minor number to be provided for a
// device. We have chosen the below major numbers to represent
// vhost-user devices.
VhostUserBlkMajor = 241
VhostUserSCSIMajor = 242
)
// Defining these as a variable instead of a const, to allow
// overriding this in the tests.
@ -223,15 +233,26 @@ type VhostUserDeviceAttrs struct {
Tag string
CacheSize uint32
Cache string
// PCIAddr is the PCI address used to identify the slot at which the drive is attached.
// It is only meaningful for vhost user block devices
PCIAddr string
// Block index of the device if assigned
Index int
}
// GetHostPathFunc is function pointer used to mock GetHostPath in tests.
var GetHostPathFunc = GetHostPath
// GetVhostUserNodeStatFunc is function pointer used to mock GetVhostUserNodeStat
// in tests. Through this functon, user can get device type information.
var GetVhostUserNodeStatFunc = GetVhostUserNodeStat
// GetHostPath is used to fetch the host path for the device.
// The path passed in the spec refers to the path that should appear inside the container.
// We need to find the actual device path on the host based on the major-minor numbers of the device.
func GetHostPath(devInfo DeviceInfo) (string, error) {
func GetHostPath(devInfo DeviceInfo, vhostUserStoreEnabled bool, vhostUserStorePath string) (string, error) {
if devInfo.ContainerPath == "" {
return "", fmt.Errorf("Empty path provided for device")
}
@ -249,6 +270,12 @@ func GetHostPath(devInfo DeviceInfo) (string, error) {
return "", nil
}
// Filter out vhost-user storage devices by device Major numbers.
if vhostUserStoreEnabled && devInfo.DevType == "b" &&
(devInfo.Major == VhostUserSCSIMajor || devInfo.Major == VhostUserBlkMajor) {
return getVhostUserHostPath(devInfo, vhostUserStorePath)
}
format := strconv.FormatInt(devInfo.Major, 10) + ":" + strconv.FormatInt(devInfo.Minor, 10)
sysDevPath := filepath.Join(SysDevPrefix, pathComp, format, "uevent")
@ -278,3 +305,58 @@ func GetHostPath(devInfo DeviceInfo) (string, error) {
return filepath.Join("/dev", devName.String()), nil
}
// getVhostUserHostPath is used to fetch host path for the vhost-user device.
// For vhost-user block device like vhost-user-blk or vhost-user-scsi, its
// socket should be under directory "<vhostUserStorePath>/block/sockets/";
// its corresponding device node should be under directory
// "<vhostUserStorePath>/block/devices/"
func getVhostUserHostPath(devInfo DeviceInfo, vhostUserStorePath string) (string, error) {
vhostUserDevNodePath := filepath.Join(vhostUserStorePath, "/block/devices/")
vhostUserSockPath := filepath.Join(vhostUserStorePath, "/block/sockets/")
sockFileName, err := getVhostUserDevName(vhostUserDevNodePath,
uint32(devInfo.Major), uint32(devInfo.Minor))
if err != nil {
return "", err
}
// Locate socket path of vhost-user device
sockFilePath := filepath.Join(vhostUserSockPath, sockFileName)
if _, err = os.Stat(sockFilePath); os.IsNotExist(err) {
return "", err
}
return sockFilePath, nil
}
func GetVhostUserNodeStat(devNodePath string, devNodeStat *unix.Stat_t) (err error) {
return unix.Stat(devNodePath, devNodeStat)
}
// Filter out name of the device node whose device type is Major:Minor from directory
func getVhostUserDevName(dirname string, majorNum, minorNum uint32) (string, error) {
files, err := ioutil.ReadDir(dirname)
if err != nil {
return "", err
}
for _, file := range files {
var devStat unix.Stat_t
devFilePath := filepath.Join(dirname, file.Name())
err = GetVhostUserNodeStatFunc(devFilePath, &devStat)
if err != nil {
return "", err
}
devMajor := unix.Major(devStat.Rdev)
devMinor := unix.Minor(devStat.Rdev)
if devMajor == majorNum && devMinor == minorNum {
return file.Name(), nil
}
}
return "", fmt.Errorf("Required device node (%d:%d) doesn't exist under directory %s",
majorNum, minorNum, dirname)
}

View File

@ -47,7 +47,9 @@ var (
)
type deviceManager struct {
blockDriver string
blockDriver string
vhostUserStoreEnabled bool
vhostUserStorePath string
devices map[string]api.Device
sync.RWMutex
@ -58,9 +60,11 @@ func deviceLogger() *logrus.Entry {
}
// NewDeviceManager creates a deviceManager object behaved as api.DeviceManager
func NewDeviceManager(blockDriver string, devices []api.Device) api.DeviceManager {
func NewDeviceManager(blockDriver string, vhostUserStoreEnabled bool, vhostUserStorePath string, devices []api.Device) api.DeviceManager {
dm := &deviceManager{
devices: make(map[string]api.Device),
vhostUserStoreEnabled: vhostUserStoreEnabled,
vhostUserStorePath: vhostUserStorePath,
devices: make(map[string]api.Device),
}
if blockDriver == VirtioMmio {
dm.blockDriver = VirtioMmio
@ -94,7 +98,7 @@ func (dm *deviceManager) findDeviceByMajorMinor(major, minor int64) api.Device {
// createDevice creates one device based on DeviceInfo
func (dm *deviceManager) createDevice(devInfo config.DeviceInfo) (dev api.Device, err error) {
path, err := config.GetHostPathFunc(devInfo)
path, err := config.GetHostPathFunc(devInfo, dm.vhostUserStoreEnabled, dm.vhostUserStorePath)
if err != nil {
return nil, err
}

View File

@ -7,17 +7,20 @@
package manager
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"testing"
"github.com/stretchr/testify/assert"
ktu "github.com/kata-containers/runtime/pkg/katatestutils"
"github.com/kata-containers/runtime/virtcontainers/device/api"
"github.com/kata-containers/runtime/virtcontainers/device/config"
"github.com/kata-containers/runtime/virtcontainers/device/drivers"
"github.com/stretchr/testify/assert"
"golang.org/x/sys/unix"
)
const fileMode0640 = os.FileMode(0640)
@ -202,8 +205,85 @@ func TestAttachBlockDevice(t *testing.T) {
assert.Nil(t, err)
}
func TestAttachVhostUserBlkDevice(t *testing.T) {
rootEnabled := true
tc := ktu.NewTestConstraint(false)
if tc.NotValid(ktu.NeedRoot()) {
rootEnabled = false
}
tmpDir, err := ioutil.TempDir("", "")
dm := &deviceManager{
blockDriver: VirtioBlock,
devices: make(map[string]api.Device),
vhostUserStoreEnabled: true,
vhostUserStorePath: tmpDir,
}
assert.Nil(t, err)
os.RemoveAll(tmpDir)
vhostUserDevNodePath := filepath.Join(tmpDir, "/block/devices/")
vhostUserSockPath := filepath.Join(tmpDir, "/block/sockets/")
deviceNodePath := filepath.Join(vhostUserDevNodePath, "vhostblk0")
deviceSockPath := filepath.Join(vhostUserSockPath, "vhostblk0")
err = os.MkdirAll(vhostUserDevNodePath, dirMode)
assert.Nil(t, err)
err = os.MkdirAll(vhostUserSockPath, dirMode)
assert.Nil(t, err)
_, err = os.Create(deviceSockPath)
assert.Nil(t, err)
// mknod requires root privilege, call mock function for non-root to
// get VhostUserBlk device type.
if rootEnabled == true {
err = unix.Mknod(deviceNodePath, unix.S_IFBLK, int(unix.Mkdev(config.VhostUserBlkMajor, 0)))
assert.Nil(t, err)
} else {
savedFunc := config.GetVhostUserNodeStatFunc
_, err = os.Create(deviceNodePath)
assert.Nil(t, err)
config.GetVhostUserNodeStatFunc = func(devNodePath string,
devNodeStat *unix.Stat_t) error {
if deviceNodePath != devNodePath {
return fmt.Errorf("mock GetVhostUserNodeStatFunc error")
}
devNodeStat.Rdev = unix.Mkdev(config.VhostUserBlkMajor, 0)
return nil
}
defer func() {
config.GetVhostUserNodeStatFunc = savedFunc
}()
}
path := "/dev/vda"
deviceInfo := config.DeviceInfo{
HostPath: deviceNodePath,
ContainerPath: path,
DevType: "b",
Major: config.VhostUserBlkMajor,
Minor: 0,
}
devReceiver := &api.MockDeviceReceiver{}
device, err := dm.NewDevice(deviceInfo)
assert.Nil(t, err)
_, ok := device.(*drivers.VhostUserBlkDevice)
assert.True(t, ok)
err = device.Attach(devReceiver)
assert.Nil(t, err)
err = device.Detach(devReceiver)
assert.Nil(t, err)
}
func TestAttachDetachDevice(t *testing.T) {
dm := NewDeviceManager(VirtioSCSI, nil)
dm := NewDeviceManager(VirtioSCSI, false, "", nil)
path := "/dev/hda"
deviceInfo := config.DeviceInfo{

View File

@ -132,3 +132,13 @@ func isLargeBarSpace(resourcePath string) (bool, error) {
return false, nil
}
// isVhostUserBlk checks if the device is a VhostUserBlk device.
func isVhostUserBlk(devInfo config.DeviceInfo) bool {
return devInfo.Major == config.VhostUserBlkMajor
}
// isVhostUserSCSI checks if the device is a VhostUserSCSI device.
func isVhostUserSCSI(devInfo config.DeviceInfo) bool {
return devInfo.Major == config.VhostUserSCSIMajor
}

View File

@ -53,3 +53,39 @@ func TestIsBlock(t *testing.T) {
assert.Equal(t, d.expected, isBlock)
}
}
func TestIsVhostUserBlk(t *testing.T) {
type testData struct {
major int64
expected bool
}
data := []testData{
{config.VhostUserBlkMajor, true},
{config.VhostUserSCSIMajor, false},
{240, false},
}
for _, d := range data {
isVhostUserBlk := isVhostUserBlk(config.DeviceInfo{Major: d.major})
assert.Equal(t, d.expected, isVhostUserBlk)
}
}
func TestIsVhostUserSCSI(t *testing.T) {
type testData struct {
major int64
expected bool
}
data := []testData{
{config.VhostUserBlkMajor, false},
{config.VhostUserSCSIMajor, true},
{240, false},
}
for _, d := range data {
isVhostUserSCSI := isVhostUserSCSI(config.DeviceInfo{Major: d.major})
assert.Equal(t, d.expected, isVhostUserSCSI)
}
}

View File

@ -66,6 +66,13 @@ type VhostUserDeviceAttrs struct {
// MacAddress is only meaningful for vhost user net device
MacAddress string
// PCIAddr is the PCI address used to identify the slot at which the drive is attached.
// It is only meaningful for vhost user block devices
PCIAddr string
// Block index of the device if assigned
Index int
}
// DeviceState is sandbox level resource which represents host devices

View File

@ -31,7 +31,7 @@ func TestSandboxRestore(t *testing.T) {
sandbox := Sandbox{
id: "test-exp",
containers: container,
devManager: manager.NewDeviceManager(manager.VirtioSCSI, nil),
devManager: manager.NewDeviceManager(manager.VirtioSCSI, false, "", nil),
hypervisor: &mockHypervisor{},
ctx: context.Background(),
config: &sconfig,

View File

@ -60,7 +60,8 @@ func TestMinimalSandboxConfig(t *testing.T) {
savedFunc := config.GetHostPathFunc
// Simply assign container path to host path for device.
config.GetHostPathFunc = func(devInfo config.DeviceInfo) (string, error) {
config.GetHostPathFunc = func(devInfo config.DeviceInfo, vhostUserStoreEnabled bool,
vhostUserStorePath string) (string, error) {
return devInfo.ContainerPath, nil
}

View File

@ -575,7 +575,9 @@ func newSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Factor
if err != nil {
s.Logger().WithError(err).WithField("sandboxid", s.id).Warning("load sandbox devices failed")
}
s.devManager = deviceManager.NewDeviceManager(sandboxConfig.HypervisorConfig.BlockDeviceDriver, devices)
s.devManager = deviceManager.NewDeviceManager(sandboxConfig.HypervisorConfig.BlockDeviceDriver,
sandboxConfig.HypervisorConfig.EnableVhostUserStore,
sandboxConfig.HypervisorConfig.VhostUserStorePath, devices)
// Load sandbox state. The hypervisor.createSandbox call, may need to access statei.
state, err := s.store.LoadState()
@ -587,7 +589,9 @@ func newSandbox(ctx context.Context, sandboxConfig SandboxConfig, factory Factor
return nil, err
}
} else {
s.devManager = deviceManager.NewDeviceManager(sandboxConfig.HypervisorConfig.BlockDeviceDriver, nil)
s.devManager = deviceManager.NewDeviceManager(sandboxConfig.HypervisorConfig.BlockDeviceDriver,
sandboxConfig.HypervisorConfig.EnableVhostUserStore,
sandboxConfig.HypervisorConfig.VhostUserStorePath, nil)
// Ignore the error. Restore can fail for a new sandbox
if err := s.Restore(); err != nil {

View File

@ -696,7 +696,7 @@ func TestSandboxAttachDevicesVFIO(t *testing.T) {
config.SysIOMMUPath = savedIOMMUPath
}()
dm := manager.NewDeviceManager(manager.VirtioSCSI, nil)
dm := manager.NewDeviceManager(manager.VirtioSCSI, false, "", nil)
path := filepath.Join(vfioPath, testFDIOGroup)
deviceInfo := config.DeviceInfo{
HostPath: path,
@ -1174,7 +1174,7 @@ func TestAttachBlockDevice(t *testing.T) {
DevType: "b",
}
dm := manager.NewDeviceManager(config.VirtioBlock, nil)
dm := manager.NewDeviceManager(config.VirtioBlock, false, "", nil)
device, err := dm.NewDevice(deviceInfo)
assert.Nil(t, err)
_, ok := device.(*drivers.BlockDevice)
@ -1230,7 +1230,7 @@ func TestPreAddDevice(t *testing.T) {
HypervisorConfig: hConfig,
}
dm := manager.NewDeviceManager(config.VirtioBlock, nil)
dm := manager.NewDeviceManager(config.VirtioBlock, false, "", nil)
// create a sandbox first
sandbox := &Sandbox{
id: testSandboxID,